Xiang Zhai
Aug 20, 2018
where
two-phase: $S_w+S_o=1$
relative permeability: $K_{r\sigma} = K_{\sigma}S_{\sigma}^n$, $\sigma = o,w$
ratio of rel perm $K_{ro} = \alpha K_{rw}$
ratio of viscosity $\mu_o = \beta \mu_w$
ratio $\gamma = \frac{\alpha}{\beta}$
net source term $Q_\sigma = q_\sigma/\rho_\sigma$
Absorb $k_w/\mu_w$ term into $K(x)$ and we have
$$\nabla\cdot[K(x)(\gamma (1-S_w)^n+S_w^n)\nabla P]=-(Q_o+Q_w)$$
and
$$\phi\frac{\partial S_w}{\partial t} - \nabla\cdot[K(x)S_w^n\nabla P] = Q_w$$
For 2D case, set
Boundary condition (rate)
Injection at $(0,0)$
Production at $(\pm0.75,\pm0.75)$
Initial condition
Known rates: $Q_{prod}$ and $Q_{inj}$
$Q_w = Q_{inj} + Q_{prod}\times F_w$, $F_w = \frac{u_w}{u_w+u_i}=\frac{s_w^n}{\gamma s_(1-s_w)^n+s_w^n}$ $Q_o = Q_{prod}\times (1-F_w)$
$Q_o+Q_w = Q_{inj}+Q_{prod}$
Let $$v_x = -K\partial_x P$$
and
$$v_y = -K\partial_y P$$
The equations we need to solve are converted to
$$\partial_x[(\gamma (1-S_w)^n+S_w^n)v_x] + \partial_y[(\gamma (1-S_w)^n+S_w^n)v_y]=Q_o+Q_w$$
and
$$\phi\frac{\partial S_w}{\partial t} + \partial_x[S_w^nv_x] + \partial_y[S_w^nv_y] = Q_w$$
A third constraint comes from the fact that $(-v_x/K,-v_y/K)=(\partial_xP,\partial_yP)$ is a conservative field (as the gradient of $P$), so that
$$\partial_y v_x - \partial_x v_y = \partial_yK(v_x/K) - \partial_xK(v_y/K)$$
In uniform case, $K$ has no spatial dependence and the equation becomes
$$\partial_y v_x - \partial_x v_y = 0$$
Note:
- I used $v_x$ and $v_y$ in the equations instead of pressure $p$. Becasue I introduced one more variable, I have to add one more equation, which is the curl-free equation.
- I didn't do test if this is helpful. My naiive thinking told me that by using $v_x$ and $v_y$, all the derivatives in the equations become first order. In Adam, the gradient will be containing the second derivatives of $v_x$, $v_y$ and $s$, and in BFGS third derivatives will be involved. If we use $p$ instead of $v_x$ and $v_y$, we only need to sovle for two equations, but computing Adam and BFGS will involve computing the derivative of $p$ in third and fourth orders. So I don't really know which approach is easier.
- One benifit of using $v_x$ and $v_y$ is that the initial and boundary conditions are easier to impose for closed boundary problem.
#!pip install -q pyDOE
import tensorflow as tf
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import scipy.io
from scipy.interpolate import griddata
import time
from pyDOE import lhs
import matplotlib.animation
np.random.seed(1234)
tf.set_random_seed(1234)
%matplotlib inline
#%matplotlib notebook
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
class PhysicsInformedNN:
# Initialize the class
def __init__(self, XYT, U, Indices, layers, WELLs, PermModel, gamma, phi, relpermn):
#XYT: n by 3 arrays, columns: x, y, t
#U: n by 1 arrays storing the value of physics quantities to enforce (initial condition, boundary conditions, etc)
#Indices: storing the indices of U that gives values of vx, vy and sw to match in the training process
#PermModel: a permeability model
#WELL: list of wells with location and rate [{x: x coord, y: y coord, Q: rate}, {}, ..] positive rate: injector, negative rate: producer
self.WR = 0.04 #radius of well impacting area
self.Walpha = 0.5/self.WR/self.WR #1/2sigma^2
self.gamma, self.phi, self.relpermn = gamma, phi, relpermn
self.WELLs = WELLs
self.PermModel = PermModel
vxi,vxj, vyi, vyj, swi, swj = Indices['vx']['begin'], Indices['vx']['end'],\
Indices['vy']['begin'], Indices['vy']['end'],\
Indices['sw']['begin'], Indices['sw']['end']
self.x = XYT[:,0:1]
self.y = XYT[:,1:2]
self.t = XYT[:,2:3]
self.u = U
self.dlnk = self.PermModel.getdlnKdxy(XYT[:,0],XYT[:,1]) #get dlnK/dx and dlnK/dy at given locations
self.layers = layers
self.layers[0] = 3 + len(self.WELLs)
self.lb = XYT.min(axis=0)
self.ub = XYT.max(axis=0)
# Initialize NNs
self.weights, self.biases = self.initialize_NN(layers)
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x_tf = tf.placeholder(tf.float32, shape=[None, 1], name='x')
self.y_tf = tf.placeholder(tf.float32, shape=[None, 1], name='y')
self.t_tf = tf.placeholder(tf.float32, shape=[None, 1], name='t')
self.u_tf = tf.placeholder(tf.float32, shape=[None, 1], name='u')
self.dlnk_tf = tf.placeholder(tf.float32, shape=[None, self.dlnk.shape[1]], name='dlnK')
self.U_pred = \
self.net_u(self.x_tf, self.y_tf, self.t_tf, self.dlnk_tf)
self.loss_vx = tf.reduce_mean(tf.square(self.U_pred[vxi:vxj,0:1] - self.u_tf[vxi:vxj,0:1])) #enforce vx
self.loss_vy = tf.reduce_mean(tf.square(self.U_pred[vyi:vyj,1:2] - self.u_tf[vyi:vyj,0:1])) #enforce vy
self.loss_sw = tf.reduce_mean(tf.square(self.U_pred[swi:swj,2:3] - self.u_tf[swi:swj,0:1])) #enforce sw
self.loss_fp = tf.reduce_mean(tf.square(self.U_pred[:,3:4])) #enforce fp
self.loss_fs = tf.reduce_mean(tf.square(self.U_pred[:,4:5])) #enforce fs
self.loss_fv = tf.reduce_mean(tf.square(self.U_pred[:,5:6])) #enforce fv
self.loss = self.loss_sw + self.loss_vx + self.loss_vy + self.loss_fp + self.loss_fs + self.loss_fv
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 1000,
'maxfun': 50000,
'maxcor': 80,
'maxls': 80,
'ftol' : 2.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer(learning_rate = 0.0005)
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, XYT, weights, biases):
num_layers = len(weights) + 1
r_list = []
for well in self.WELLs:
r_list.append(tf.reduce_sum(tf.pow(XYT[:,0:2]-tf.constant([well['x'],well['y']],shape=[1,2]),2),axis=1))
R = tf.stack(r_list, axis=1)
H = tf.concat([2.0*(XYT - self.lb)/(self.ub - self.lb) - 1.0, R],1)
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
#if (l<num_layers-6) and (l%2==0):
# H = tf.nn.leaky_relu(tf.add(tf.matmul(H, W), b))
#else:
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_u(self, x, y, t, dlnkxy):
u = self.neural_net(tf.concat([x,y,t],1), self.weights, self.biases)
#u = self.neural_net(tf.concat([x,y,t],1), self.weights, self.biases)
vx, vy, sw = u[:,0:1], u[:,1:2], tf.sigmoid(u[:,2:3])
if self.relpermn==1:
if self.gamma==1:
mobility = 1
else:
mobility = self.gamma*(1-sw) + sw
else:
mobility = self.gamma*tf.pow(1-sw,self.relpermn)+tf.pow(sw,self.relpermn)
Fw = tf.pow(sw, self.relpermn)/mobility
Q_prod = 0
Q_inj = 0
for well in self.WELLs:
if well['rate']>0: #injector
Q_inj = Q_inj + well['rate']*self.Walpha/3.1415927* \
tf.exp(-self.Walpha*(tf.pow(x- well['x'],2)+tf.pow(y-well['y'],2)))
else:
Q_prod = Q_prod + well['rate']*self.Walpha/3.1415927* \
tf.exp(-self.Walpha*(tf.pow(x-well['x'],2)+tf.pow(y-well['y'],2)))
Q_o = Q_prod*(1-Fw)
Q_w = Q_inj + Q_prod*Fw
p_eqn_LHS = tf.gradients(mobility*vx,x)[0]+tf.gradients(mobility*vy,y)[0]
p_eqn_RHS = Q_inj + Q_prod
s_eqn_LHS = self.phi*tf.gradients(sw,t)[0] \
+ tf.gradients(tf.pow(sw,self.relpermn)*vx,x)[0] \
+ tf.gradients(tf.pow(sw,self.relpermn)*vy,y)[0]
s_eqn_RHS = Q_w
v_eqn_LHS = tf.gradients(vx,y)[0] - tf.gradients(vy,x)[0]
v_eqn_RHS = 0#tf.multiply(dlnkxy[:,0:1],vx) - tf.multiply(dlnkxy[:,1:2],vy)
f_p = p_eqn_LHS - p_eqn_RHS
f_s = s_eqn_LHS - s_eqn_RHS
f_v = v_eqn_LHS - v_eqn_RHS
return tf.concat([vx,vy,sw,f_p,f_s,f_v],1)
def callback(self, loss, loss_sw, loss_vx, loss_vy, loss_fp, loss_fs, loss_fv):
self.iter += 1
if (self.iter % 50 == 0):
print('Iter: %05d, Loss: %.2e, sw: %.2e, vx: %.2e, vy: %.2e, fp: %.2e, fs: %.2e, fv: %.2e' % \
(self.iter, loss, loss_sw, loss_vx, loss_vy, loss_fp, loss_fs, loss_fv))
def train(self,nIter_adam):
tf_dict = {self.x_tf: self.x,
self.y_tf: self.y,
self.t_tf: self.t,
self.u_tf: self.u,
self.dlnk_tf: self.dlnk}
start_t= time.time()
for it in range(nIter_adam):
self.sess.run(self.train_op_Adam, tf_dict)
# Print
if it % 50 == 0:
elapsed = time.time() - start_t
loss_value = self.sess.run(self.loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_t = time.time()
self.iter = 0
self.optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.loss, self.loss_sw, self.loss_vx, self.loss_vy, self.loss_fp, self.loss_fs, self.loss_fv],
loss_callback = self.callback)
def predict(self, XYT):
dlnkxy = self.PermModel.getdlnKdxy(XYT[:,0],XYT[:,1])
U = self.sess.run(self.U_pred, {self.x_tf: XYT[:,0:1], self.y_tf: XYT[:,1:2], self.t_tf: XYT[:,2:3], self.dlnk_tf: dlnkxy})
return U[:,0:1], U[:,1:2], U[:,2:3], U[:,3:4], U[:,4:5], U[:,5:6]
class Permeability:
def __init__(self,x, y, alpha=-3.5, verbose = True):
self.x, self.y = x, y
self.nx, self.ny = len(self.x), len(self.y)
self.x0, self.dx = x[0], x[1]-x[0]
self.y0, self.dy = y[0], y[1]-y[0]
self.X,self.Y = np.meshgrid(self.x,self.y)
K = self.gaussian_random_field(Pk = lambda k: k**alpha, nx = self.nx, ny = self.ny)
self.K = (K - K.min())/(K.max()-K.min())*2.0+0.1
self.lnK = np.log(self.K)
self.dlnKdX = np.gradient(self.lnK, self.x, axis=1)
self.dlnKdY = np.gradient(self.lnK, self.y, axis=0)
if verbose:
self.draw()
def fftIndgen(self,n):
a = range(-(n//2-1), n//2+1)
return a
def gaussian_random_field(self,Pk = lambda k : k**-3.0, nx = 100, ny = 100):
def Pk2(kx, ky):
if kx == 0 and ky == 0:
return 0.0
return np.sqrt(Pk(np.sqrt(kx**2 + ky**2)))
noise = np.fft.fft2(np.random.normal(size = (ny, nx)))
amplitude = np.zeros((ny, nx))
for i, ky in enumerate(self.fftIndgen(ny)):
for j, kx in enumerate(self.fftIndgen(nx)):
amplitude[i, j] = Pk2(kx, ky)
return np.abs(np.fft.ifft2(noise * amplitude))
def getK(self,x,y):
jx = np.array(np.floor((x- self.x0)/self.dx),dtype=int)
iy = np.array(np.floor((y- self.y0)/self.dy),dtype=int)
return self.K[iy,jx]
def getdlnKdxy(self,x,y):
jx = np.array(np.floor((x- self.x0)/self.dx),dtype=int)
iy = np.array(np.floor((y- self.y0)/self.dy),dtype=int)
dlnkx = self.dlnKdX[iy,jx]
dlnky = self.dlnKdY[iy,jx]
return np.c_[dlnkx,dlnky]
def draw(self):
plt.figure(figsize=(10,10))
for i, dataname in enumerate(['K','lnK','dlnKdX', 'dlnKdY']):
ax = plt.subplot(2,2,i+1)
plt.pcolor(self.X, self.Y, getattr(self, dataname),cmap='jet')
plt.xlabel('x'),plt.ylabel('y')
plt.axis([x.min(),x.max(),y.min(),y.max()])
plt.axis('square')
plt.colorbar()
plt.title(dataname)
plt.show()
def sample_nearWell(n = 64, radius = 0.03, xy0 = [0.0,0.0]):
def sample_1d_gaussian(n):
F = np.linspace(0,1,n+1)
r = np.sqrt(-2.0*np.log(F[1:-1]))
return r
n_theta = 15
n_layer = int(np.floor((n-1)/n_theta)) + 1
n_theta = int(np.ceil((n-1)/(n_layer-1)))
theta = np.linspace(0,2*np.pi,n_theta+1)
theta = theta[:-1]
R = sample_1d_gaussian(n_layer)*radius
xy = [np.array([[0],[0]])]
for r in R:
xy.append(np.array([r*np.cos(theta), r*np.sin(theta)]))
xy = np.concatenate(xy,axis=1).T+xy0
return xy
def computePressure(x,y,Px,Py):
#reversely compute P given dp/dx and dp/dy
dx = x[1]-x[0]
dy = y[1]-y[0]
P_left = scipy.integrate.cumtrapz(Py[:,0],y,initial = 0).reshape((len(y)),1)
P = scipy.integrate.cumtrapz(Px,x,axis=1, initial = 0)
P = P + P_left.repeat(len(x),axis=1)
P = P - P.mean()
return P
def draw_prediction(t, n=129):
x_star = np.linspace(-1,1,n)
y_star = np.linspace(-1,1,n)
X_star, Y_star = np.meshgrid(x_star, y_star)
K = Kmodel.getK(X_star,Y_star)
XYT_star = np.c_[X_star.flatten(),Y_star.flatten(),t*np.ones((X_star.size,1))]
t1 = time.time()
vx_pred, vy_pred, s_pred, fp_pred, fs_pred, fv_pred = model.predict(XYT_star)
print('Took %f second to compute %d data points at time %f' % (time.time()-t1, x_star.size*y_star.size, t))
xrange = [x_star[0],x_star[-1]]
yrange = [x_star[0],x_star[-1]]
VX_pred = vx_pred.reshape(X_star.shape)
VY_pred = vy_pred.reshape(X_star.shape)
S_pred = s_pred.reshape(X_star.shape)
Fp_pred = fp_pred.reshape(X_star.shape)
Fs_pred = fs_pred.reshape(X_star.shape)
Fv_pred = fv_pred.reshape(X_star.shape)
#P_pred = computePressure(x_star,y_star,-VX_pred/K,-VY_pred/K)
P_pred = computePressure(x_star,y_star,-VX_pred,-VY_pred)
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(331)
plt.pcolor(X_star, Y_star, VX_pred,cmap='jet')
plt.colorbar()
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('Vx at time %f' % t)
ax = fig.add_subplot(332)
plt.pcolor(X_star, Y_star, VY_pred,cmap='jet')
plt.colorbar()
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('Vy at time %f' % t)
ax = fig.add_subplot(333)
plt.pcolor(X_star, Y_star, 0.5*np.log(VY_pred**2+VX_pred**2),cmap='jet')
plt.colorbar()
plt.streamplot(x_star, y_star, VX_pred, VY_pred, color='w')
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('log(V) at time %f' % t)
ax = fig.add_subplot(334)
plt.pcolor(X_star, Y_star, K,cmap='jet')
plt.colorbar()
plt.streamplot(x_star, y_star, VX_pred, VY_pred, color='w')
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('K')
ax = fig.add_subplot(335)
plt.pcolor(X_star, Y_star, S_pred,cmap='jet', vmin = 0.3, vmax = 1)
plt.colorbar()
plt.streamplot(x_star, y_star, VX_pred, VY_pred, color='w')
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('S at time %f' % t)
ax = fig.add_subplot(336)
plt.pcolor(X_star, Y_star, P_pred,cmap='jet')
plt.colorbar()
plt.streamplot(x_star, y_star, VX_pred, VY_pred, density = 2, color='w')
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('pressure at time %f' % t)
ax = fig.add_subplot(337)
plt.pcolor(X_star, Y_star, Fp_pred,cmap='jet',vmin = np.percentile(Fp_pred,1),vmax = np.percentile(Fp_pred,99))
plt.colorbar()
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('F pressure at time %f' % t)
ax = fig.add_subplot(338)
plt.pcolor(X_star, Y_star, Fs_pred,cmap='jet',vmin = np.percentile(Fs_pred,1),vmax = np.percentile(Fs_pred,99))
plt.colorbar()
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('F saturation at time %f' % t)
ax = fig.add_subplot(339)
plt.pcolor(X_star, Y_star, Fv_pred,cmap='jet',vmin = np.percentile(Fv_pred,1),vmax = np.percentile(Fv_pred,99))
plt.colorbar()
plt.xlabel('x'),plt.ylabel('y')
plt.xlim(xrange), plt.ylim(yrange)
plt.axis('square')
plt.title('F velocity at time %f' % t)
plt.show()
The DNN architecture has to be much more complex than 1D.
N_i = 32 #number of points to enforce on initial condition
N_b = 32 #number of points to enforce on boundary condition
N_f = 8192 #number of points to enforce on physics
#layers = [2, 20, 20, 20, 20, 1]
layers = [3, 128, 128, 128, 128, 128, 64, 64, 64, 32, 3]
#layers = [3, 64, 64, 64, 64, 64, 64, 32, 16, 8, 3]
#layers = [2, 32, 32, 32, 32, 32, 2]
#layers = [2, 16, 32, 32, 32, 32, 64, 64, 32, 32, 32, 32, 8, 4, 2]
gamma, phi, relpermn = 0.2, 0.2, 2
t = np.linspace(0,1,N_b)
x = np.linspace(-1,1,N_i)
y = np.linspace(-1,1,N_i)
X, Y, T = np.meshgrid(x, y, t)
Positvie rate for injectors, negative rates for producers
# wells
wxyr = [[-0.75, 0.75, -0.20],
[ 0.75, 0.75, -0.10],
[-0.75, -0.75, -0.04],
[ 0.75, -0.75, -0.26],
[ 0.00, 0.00, 0.60]]
#wxyr = [[-0.65, 0.65, -0.20],
# [ 0.65, -0.65, 0.20]]
WELLs = [{'x':w[0],'y':w[1],'rate':w[2]} for w in wxyr]
#sample local refined points around wells
n_refine = 64
XY_nearWell_kernel = 0.3*lhs(2, n_refine)-0.15
XY_nearWell = np.empty([0, 2])
for well in WELLs:
XY_nearWell = np.r_[XY_nearWell, XY_nearWell_kernel+[well['x'],well['y']]]
# XY_nearWell = []
# for well in WELLs:
# XY_nearWell.append(sample_nearWell(n_refine,radius = 0.05, xy0 = [well['x'],well['y']]))
plt.figure()
plt.scatter(XY_nearWell_kernel[:,0], XY_nearWell_kernel[:,1])
plt.axis('equal')
# XY_nearWell = np.concatenate(XY_nearWell,axis=0)
#initial and boundary condition
XY_initial_S = np.c_[X[:,:,0].flatten(),Y[:,:,0].flatten()]
XY_initial_S = np.r_[XY_initial_S,XY_nearWell]
XYT_initial_S = np.c_[XY_initial_S,np.zeros((XY_initial_S.shape[0],1),dtype=np.float32)]
S_initial = 0.2*np.ones((XYT_initial_S.shape[0],1))
LeftB = np.c_[X[:,0,:].flatten(),Y[:,0,:].flatten(),T[:,0,:].flatten()]
RightB = np.c_[X[:,-1,:].flatten(),Y[:,-1,:].flatten(),T[:,-1,:].flatten()]
XYT_boundary_VX = np.r_[LeftB,RightB]
VX_boundary = np.zeros((XYT_boundary_VX.shape[0],1))
TopB = np.c_[X[0,:,:].flatten(),Y[0,:,:].flatten(),T[0,:,:].flatten()]
BotB = np.c_[X[-1,:,:].flatten(),Y[-1,:,:].flatten(),T[-1,:,:].flatten()]
XYT_boundary_VY = np.r_[TopB,BotB]
VY_boundary = np.zeros((XYT_boundary_VY.shape[0],1))
#collocation points
lb = np.array([x.min(), y.min(), t.min()])
ub = np.array([x.max(), y.max(), t.max()])
#randomly sample points
XYT_f = lb + (ub-lb)*lhs(3, N_f)
# n_t = 17
# n_xy = N_f//n_t
# XYT_f = np.empty([0,3])
# for tmp_t in np.linspace(0,1,n_t):
# tmp_XY_f = lb[:2] + (ub[:2]-lb[:2])*lhs(2, n_xy)
# tmp_XYT_f = np.c_[tmp_XY_f,tmp_t*np.ones((n_xy,1))]
# XYT_f = np.r_[XYT_f,tmp_XYT_f]
T_nearWell = np.linspace(0,1,17).repeat(XY_nearWell.shape[0],axis=0)
XYT_nearWell = np.c_[np.tile(XY_nearWell,[17,1]),T_nearWell]
#assemble points
n_bx, n_by, n_i, n_f, n_nearWell = XYT_boundary_VX.shape[0], XYT_boundary_VY.shape[0], XYT_initial_S.shape[0], XYT_f.shape[0],XYT_nearWell.shape[0]
XYT = np.r_[XYT_boundary_VX, XYT_boundary_VY,XYT_initial_S,XYT_f,XYT_nearWell]
U = np.r_[VX_boundary, VY_boundary, S_initial]
Indices = {'vx': {'begin': 0, 'end': n_bx}, \
'vy': {'begin': n_bx, 'end': n_bx+n_by}, \
'sw': {'begin': n_bx+n_by, 'end': n_bx+n_by+n_i}}
print('Initial Condition: %d points' % n_i)
print('x boundary Condition: %d points' % n_bx)
print('y boundary Condition: %d points' % n_by)
print('random collocatiton : %d points' % XYT_f.shape[0])
print('near well local refine: %d points' % XYT_nearWell.shape[0])
print('i/b conditions : %d points' % U.shape[0])
print('total points : %d points' % XYT.shape[0])
Quickly plotting all control points in $xyt$ 2D+1D spacetime. Here are the explanation on what color means what
| color value | type of control points |
|---|---|
| 1 | points on $x=\pm1$ for boundary condition |
| 2 | points on $y=\pm1$ for boundary condition |
| 3 | points at $t=0$ for initial condition |
| 4 | random points for physics |
| 5 | refined points near wells |
cm = LinearSegmentedColormap.from_list('my_cm',np.random.rand(5,3),N=5)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111,projection='3d')
control_points_type = np.r_[np.ones(n_bx),2*np.ones(n_by),3*np.ones(n_i),4*np.ones(n_f),5*np.ones(n_nearWell)]
p = ax.scatter(XYT[:,0],XYT[:,1],XYT[:,2],c=control_points_type,cmap=cm,s=3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
fig.colorbar(p)
ax.view_init(-20,30)
fig = plt.figure(figsize=(5,5))
plt.scatter(XYT[:,0],XYT[:,1], c=control_points_type, cmap=cm,s=1)
Non-uniform permeability model is not used yet
Kmodel = Permeability(np.linspace(-1,1,257),np.linspace(-1,1,257))
model = PhysicsInformedNN(XYT, U, Indices, layers, WELLs, Kmodel, gamma, phi, relpermn)
We first train the model with 500 iterations of Adam. Then train it with BFGS.
I plot $v_x$, $v_y$, $v$, $s$, $p$ and mismatch in pressure equation, saturation equation and velocity equation after every 10000 iterations. Some of the plots has streamline overlaid plotted by matplotlib streamplot function. Not sure how accurate the streamlines are.
for i in range(15):
draw_prediction(0)
start_time = time.time()
if i<1:
model.train(500)
else:
model.train(0)
elapsed = time.time() - start_time
print('Training time: %.4f' % (elapsed))
draw_prediction(0.3)
draw_prediction(0.8)
loss = {'Loss':[],'sw':[],'vx':[],'vy':[],'fp':[],'fs':[],'fv':[]}
with open('loss.txt','rb') as f:
for line in f:
line = line.decode('utf-8')
if (len(line)>4) and (line[:4]=='Iter'):
tmp = [item.split(':') for item in line.split(',')[1:]]
for item in tmp:
loss[item[0].lstrip()].append(float(item[1]))
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
for key in loss:
l = len(loss[key])
plt.plot(range(0,l*50,50),loss[key],label=key)
plt.xlabel('# Iteration')
plt.ylabel('Loss')
ax.set_yscale('log')
plt.legend()
ax.grid(linestyle=":",which='both')